nestedsvm: Support TSC Rate MSR
authorChristoph Egger <Christoph.Egger@amd.com>
Sat, 16 Jul 2011 08:23:22 +0000 (09:23 +0100)
committerChristoph Egger <Christoph.Egger@amd.com>
Sat, 16 Jul 2011 08:23:22 +0000 (09:23 +0100)
Support TSC Rate MSR and enable TSC scaling for
nested virtualization.

With it, guest VMs don't need take #VMEXIT to calculate a translated
TSC value when it is running under TSC emulation mode.

I measured native performance of the rdtsc instruction
in the l2 guest with xen-on-xen and both host and
and l1 guest run under TSC emulation mode.

TSC scaling just needs MSR emulation and correct tsc offset
calculation to be done and thus can be emulated also on older
hardware. In this case rdtsc instruction is intercepted and
handled by the host directly and safes the cost of a full
VMRUN/VMEXIT emulation cycle.

Signed-off-by: Christoph Egger <Christoph.Egger@amd.com>
tools/libxc/xc_cpuid_x86.c
xen/arch/x86/hvm/svm/nestedsvm.c
xen/arch/x86/hvm/svm/svm.c
xen/include/asm-x86/hvm/svm/nestedsvm.h

index d41f0b0eced86626567ed0f799c28f27a2179c1c..0796ae91d20821970520072fc7c7873d93581d4e 100644 (file)
@@ -157,7 +157,7 @@ static void amd_xc_cpuid_policy(
             SVM_FEATURE_DECODEASSISTS);
 
         /* Pass 2: Always enable SVM features which are emulated */
-        regs[3] |= SVM_FEATURE_VMCBCLEAN;
+        regs[3] |= SVM_FEATURE_VMCBCLEAN | SVM_FEATURE_TSCRATEMSR;
         break;
     }
 
index cb5e1a31ebd2762afafe8cb384cba8a67f767f7e..ba7683b25a3b5fe1cb42d9ed774c4d1bfdf39609 100644 (file)
@@ -147,6 +147,8 @@ int nsvm_vcpu_reset(struct vcpu *v)
     svm->ns_msr_hsavepa = VMCX_EADDR;
     svm->ns_ovvmcb_pa = VMCX_EADDR;
 
+    svm->ns_tscratio = DEFAULT_TSC_RATIO;
+
     svm->ns_cr_intercepts = 0;
     svm->ns_dr_intercepts = 0;
     svm->ns_exception_intercepts = 0;
@@ -1185,6 +1187,9 @@ int nsvm_rdmsr(struct vcpu *v, unsigned int msr, uint64_t *msr_content)
     case MSR_K8_VM_HSAVE_PA:
         *msr_content = svm->ns_msr_hsavepa;
         break;
+    case MSR_AMD64_TSC_RATIO:
+        *msr_content = svm->ns_tscratio;
+        break;
     default:
         ret = 0;
         break;
@@ -1211,6 +1216,16 @@ int nsvm_wrmsr(struct vcpu *v, unsigned int msr, uint64_t msr_content)
         }
         svm->ns_msr_hsavepa = msr_content;
         break;
+    case MSR_AMD64_TSC_RATIO:
+        if ((msr_content & ~TSC_RATIO_RSVD_BITS) != msr_content) {
+            gdprintk(XENLOG_ERR,
+                "reserved bits set in MSR_AMD64_TSC_RATIO 0x%"PRIx64"\n",
+                msr_content);
+            ret = -1; /* inject #GP */
+            break;
+        }
+        svm->ns_tscratio = msr_content;
+        break;
     default:
         ret = 0;
         break;
index 00c546681df55d58e32e827291c3c83668e7ee81..8d9ee8747ff40e1e90bad012e7a0c76ee5f66bb4 100644 (file)
@@ -635,28 +635,37 @@ static void svm_set_segment_register(struct vcpu *v, enum x86_segment seg,
         svm_vmload(vmcb);
 }
 
+static uint64_t svm_get_tsc_offset(uint64_t host_tsc, uint64_t guest_tsc,
+    uint64_t ratio)
+{
+    uint64_t offset;
+
+    if (ratio == DEFAULT_TSC_RATIO)
+        return guest_tsc - host_tsc;
+
+    /* calculate hi,lo parts in 64bits to prevent overflow */
+    offset = (((host_tsc >> 32U) * (ratio >> 32U)) << 32U) +
+          (host_tsc & 0xffffffffULL) * (ratio & 0xffffffffULL);
+    return guest_tsc - offset;
+}
+
 static void svm_set_tsc_offset(struct vcpu *v, u64 offset)
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
     struct vmcb_struct *n1vmcb, *n2vmcb;
     uint64_t n2_tsc_offset = 0;
     struct domain *d = v->domain;
+    uint64_t host_tsc, guest_tsc;
 
-    if ( !nestedhvm_enabled(d) ) {
-        /* Re-adjust the offset value when TSC_RATIO is available */
-        if ( cpu_has_tsc_ratio && d->arch.vtsc )
-        {
-            uint64_t host_tsc, guest_tsc;
-
-            rdtscll(host_tsc);
-            guest_tsc = hvm_get_guest_tsc(v);
-            
-            /* calculate hi,lo parts in 64bits to prevent overflow */
-            offset = (((host_tsc >> 32) * d->arch.tsc_khz / cpu_khz) << 32) +
-                     (host_tsc & 0xffffffffULL) * d->arch.tsc_khz / cpu_khz;
-            offset = guest_tsc - offset;
-        }
+    guest_tsc = hvm_get_guest_tsc(v);
+
+    /* Re-adjust the offset value when TSC_RATIO is available */
+    if ( cpu_has_tsc_ratio && d->arch.vtsc ) {
+        rdtscll(host_tsc);
+        offset = svm_get_tsc_offset(host_tsc, guest_tsc, vcpu_tsc_ratio(v));
+    }
 
+    if ( !nestedhvm_enabled(d) ) {
         vmcb_set_tsc_offset(vmcb, offset);
         return;
     }
@@ -665,8 +674,14 @@ static void svm_set_tsc_offset(struct vcpu *v, u64 offset)
     n2vmcb = vcpu_nestedhvm(v).nv_n2vmcx;
 
     if ( nestedhvm_vcpu_in_guestmode(v) ) {
+        struct nestedsvm *svm = &vcpu_nestedsvm(v);
+
         n2_tsc_offset = vmcb_get_tsc_offset(n2vmcb) -
             vmcb_get_tsc_offset(n1vmcb);
+        if ( svm->ns_tscratio != DEFAULT_TSC_RATIO ) {
+            n2_tsc_offset = svm_get_tsc_offset(guest_tsc,
+                guest_tsc + n2_tsc_offset, svm->ns_tscratio);
+        }
         vmcb_set_tsc_offset(n1vmcb, offset);
     }
 
@@ -1107,6 +1122,7 @@ struct hvm_function_table * __init start_svm(void)
     P(cpu_has_svm_cleanbits, "VMCB Clean Bits");
     P(cpu_has_svm_decode, "DecodeAssists");
     P(cpu_has_pause_filter, "Pause-Intercept Filter");
+    P(cpu_has_tsc_ratio, "TSC Rate MSR");
 #undef P
 
     if ( !printed )
index ec8114643cc16b81b9f201d72701d8fb72859403..65c13211a2f9c05e303dcae522ca045b21bb8050 100644 (file)
@@ -36,6 +36,11 @@ struct nestedsvm {
      */
     uint64_t ns_ovvmcb_pa;
 
+    /* virtual tscratio holding the value l1 guest writes to the
+     * MSR_AMD64_TSC_RATIO MSR.
+     */
+    uint64_t ns_tscratio;
+
     /* Cached real intercepts of the l2 guest */
     uint32_t ns_cr_intercepts;
     uint32_t ns_dr_intercepts;